[IA64] slightly improve stability
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 8 May 2006 18:47:54 +0000 (12:47 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 8 May 2006 18:47:54 +0000 (12:47 -0600)
vcpu_ptc_e: fix flush order.
vcpu_ptc_g: fix typo (only local vcpu v-tlb was flushed)
itlb_pte/dtlb_pte removed.
vcpu_itr_* and vcpu_itc_no_srlz call vcpu_set_tr_entry coherently.
in_tpa parameter of vcpu_translate removed.
handle_lazy_cover is now static and unused 'isr' removed.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xen/arch/ia64/asm-offsets.c
xen/arch/ia64/xen/hyperprivop.S
xen/arch/ia64/xen/process.c
xen/arch/ia64/xen/vcpu.c
xen/include/asm-ia64/domain.h
xen/include/asm-ia64/vcpu.h

index 0bc2463dfcc5fd0f8229ad0df1c2a34ebb6f789b..c593584655e3327325978d96858fcc2a3fbfe59c 100644 (file)
@@ -50,8 +50,6 @@ void foo(void)
        DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
        DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
        DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
-       DEFINE(IA64_VCPU_DTLB_PTE_OFFSET, offsetof (struct vcpu, arch.dtlb_pte));
-       DEFINE(IA64_VCPU_ITLB_PTE_OFFSET, offsetof (struct vcpu, arch.itlb_pte));
        DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
        DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
        DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
index f32264b1c7961d8a8867df438f031735b0d7fd05..1fec142a4e65b25910cc9a86674d631162374f76 100644 (file)
@@ -30,7 +30,7 @@
 #undef FAST_ITC        //XXX CONFIG_XEN_IA64_DOM0_VP
                //    TODO fast_itc doesn't suport dom0 vp yet.
 #else
-//#define FAST_ITC     // working but default off for now
+//#define FAST_ITC     // to be reviewed
 #endif
 #define FAST_BREAK
 #ifndef CONFIG_XEN_IA64_DOM0_VP
@@ -769,7 +769,7 @@ GLOBAL_ENTRY(fast_access_reflect)
 GLOBAL_ENTRY(fast_tlb_miss_reflect)
 #ifndef FAST_TLB_MISS_REFLECT // see beginning of file
        br.spnt.few page_fault ;;
-#endif
+#else
        mov r31=pr
        mov r30=cr.ipsr
        mov r29=cr.iip
@@ -1007,6 +1007,7 @@ page_not_present:
        mov r29=cr.iip
        mov r30=cr.ipsr
        br.sptk.many fast_reflect;;
+#endif
 END(fast_tlb_miss_reflect)
 
 // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
@@ -2003,7 +2004,7 @@ ENTRY(hyper_itc_i)
 ENTRY(hyper_itc_d)
 #ifndef FAST_ITC
        br.sptk.many dispatch_break_fault ;;
-#endif
+#else
        // ensure itir.ps >= xen's pagesize
        adds r23=XSI_ITIR_OFS-XSI_PSR_IC_OFS,r18 ;;
        ld8 r23=[r23];;
@@ -2040,7 +2041,9 @@ ENTRY(hyper_itc_d)
        movl r30=recover_and_dispatch_break_fault ;;
        mov r16=r8;;
        // fall through
+#endif
 
+#if defined(FAST_ITC) || defined (FAST_TLB_MISS_REFLECT)
 
 // fast_insert(PSCB(ifa),r24=ps,r16=pte)
 //     r16 == pte
@@ -2175,4 +2178,4 @@ no_inc_iip:
        rfi
        ;;
 END(fast_insert)
-
+#endif
index 29284e2d30729c850b562822836c3d21df532765..02f5c47b853a6cfcb5f2e871601081af7b359a65 100644 (file)
@@ -265,7 +265,8 @@ void deliver_pending_interrupt(struct pt_regs *regs)
 }
 unsigned long lazy_cover_count = 0;
 
-int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
+static int
+handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
 {
        if (!PSCB(v,interrupt_collection_enabled)) {
                PSCB(v,ifs) = regs->cr_ifs;
@@ -285,7 +286,7 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg
        unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
        IA64FAULT fault;
 
-       if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, isr, regs)) return;
+       if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
        if ((isr & IA64_ISR_SP)
            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
        {
@@ -299,7 +300,7 @@ void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_reg
        }
 
  again:
-       fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
+       fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
        if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
                u64 logps;
                pteval = translate_domain_pte(pteval, address, itir, &logps);
@@ -813,7 +814,7 @@ printf("*** Handled privop masquerading as NaT fault\n");
                while(vector);
                return;
        }
-       if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, isr, regs)) return;
+       if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
        PSCB(current,ifa) = ifa;
        PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
        reflect_interruption(isr,regs,vector);
index 2a293b79de98fe899112cdbeb654ceaf01323d57..9052010444f350fb7a4b5e6813f6718f37d15288 100644 (file)
@@ -1290,8 +1290,7 @@ static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
        return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
 }
 
-// in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
-IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
+IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
 {
        unsigned long region = address >> 61;
        unsigned long pta, rid, rr;
@@ -1368,12 +1367,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in
        pte = trp->pte;
        if (/* is_data && */ pte.p
            && vcpu_match_tr_entry_no_p(trp,address,rid)) {
-#ifndef CONFIG_XEN_IA64_DOM0_VP
-               if (vcpu->domain==dom0 && !in_tpa)
-                       *pteval = pte.val;
-               else
-#endif
-               *pteval = vcpu->arch.dtlb_pte;
+               *pteval = pte.val;
                *itir = trp->itir;
                dtlb_translate_count++;
                return IA64_USE_TLB;
@@ -1422,7 +1416,7 @@ IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
        UINT64 pteval, itir, mask, iha;
        IA64FAULT fault;
 
-       fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
+       fault = vcpu_translate(vcpu, vadr, TRUE, &pteval, &itir, &iha);
        if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
        {
                mask = itir_mask(itir);
@@ -1800,12 +1794,10 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64
        if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
                return;
        if (IorD & 0x1) {
-               vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
-               PSCBX(vcpu,itlb_pte) = mp_pte;
+               vcpu_set_tr_entry(&PSCBX(vcpu,itlb),mp_pte,ps<<2,vaddr);
        }
        if (IorD & 0x2) {
-               vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
-               PSCBX(vcpu,dtlb_pte) = mp_pte;
+               vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),mp_pte,ps<<2,vaddr);
        }
 }
 
@@ -1882,13 +1874,15 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
        // architected loop to purge the entire TLB, should use
        //  base = stride1 = stride2 = 0, count0 = count 1 = 1
 
+       // just invalidate the "whole" tlb
+       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
+       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
 #ifdef VHPT_GLOBAL
        vhpt_flush();   // FIXME: This is overdoing it
 #endif
        local_flush_tlb_all();
-       // just invalidate the "whole" tlb
-       vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
-       vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+
        return IA64_NO_FAULT;
 }
 
@@ -1915,8 +1909,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
 
                /* Purge TC entries.
                   FIXME: clear only if match.  */
-               vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
-               vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
+               vcpu_purge_tr_entry(&PSCBX(v,dtlb));
+               vcpu_purge_tr_entry(&PSCBX(v,itlb));
 
 #ifdef VHPT_GLOBAL
                /* Invalidate VHPT entries.  */
index f9c0bf86d20a81ed62a2f251b31ee8cff99966a8..cf819cbded105469b361f19f09e44c84d308236e 100644 (file)
@@ -69,8 +69,6 @@ struct arch_vcpu {
        TR_ENTRY dtlb;
        unsigned int itr_regions;
        unsigned int dtr_regions;
-       unsigned long itlb_pte;
-       unsigned long dtlb_pte;
        unsigned long irr[4];
        unsigned long insvc[4];
        unsigned long tc_regions;
index 28a011715283e81b50c91252315a6f1a2f1c77a2..3b03de0bc94a9fcb648281d2f2834967081304a4 100644 (file)
@@ -148,8 +148,7 @@ extern IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
 extern IA64FAULT vcpu_ptc_ga(VCPU *vcpu, UINT64 vadr, UINT64 addr_range);
 extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
 extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
-extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address,
-                               BOOLEAN is_data, BOOLEAN in_tpa,
+extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data,
                                UINT64 *pteval, UINT64 *itir, UINT64 *iha);
 extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
 extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);